from pathlib import Path
import logging
import os
import functools
import re
import pprint as pp
import datetime
import warnings
import yaml
import numpy as np
import pandas as pd
import scipy.stats
import fitgrid
import fitgrid.utils as fgutil
import matplotlib as mpl
from matplotlib import pyplot as plt
from udck19_utils import (
formula_to_name,
plotchans,
MPL_32_CHAN,
MPL_MIDLINE,
read_fg_summaries_hdf,
panel_from_idx
)
from udck19_filenames import (
EEG_EPOCHS_DIR, EEG_MODELING_DIR,
PREPOCHS_TRMD_EEG_F,
)
from udck19_utils import (
get_udck19_logger,
check_ENV,
N_EPOCH_SAMPS, # epoch length in samples
N_TRMD_EEG_EPOCHS, # number of epochs after EEG screening in pipeline_1
EEG_SCREEN_COL, # HDF5 dataset key
EEG_EXPT_SPECS,
EEG_26_STREAMS,
RHS_VARS,
LMER_MODELS,
LMER_MODELS_BY_EXPT,
check_epochs_shape,
standardize,
fit_lmer_formulas,
udck19_figsave,
)
# enforce active conda env
check_ENV()
# logging config
__file__ = 'udck19_pipeline_6.ipynb'
logging.shutdown()
LOGGER = get_udck19_logger(__file__)
pipeline_start = datetime.datetime.now()
LOGGER.info(f"""
udck19_pipeline_6
CONDA_DEFAULT_ENV: {os.environ['CONDA_DEFAULT_ENV']}
pandas: {pd.__version__}
fitgrid: {fitgrid.__version__}
Start {pipeline_start.strftime("%d.%b %Y %H:%M:%S")}
""")
PRERUN = False
if PRERUN:
step = 5
time_slice = pd.IndexSlice[:, slice(-200, 600, step)]
LMER_CHANNELS = LMER_CHANNELS = ['MiPf', 'MiCe', 'MiPa', 'MiOc']
modl_path = EEG_MODELING_DIR / "prerun"
pfx = f'step{step}_chans{len(LMER_CHANNELS)}_'
N_CORES=21 # number of timepoints
else:
time_slice = pd.IndexSlice[:, :]
LMER_CHANNELS = EEG_26_STREAMS
modl_path = EEG_MODELING_DIR
pfx = ""
N_CORES=24
# for plots
mpl.rcParams['figure.max_open_warning'] = 30
style='seaborn-bright'
plt.style.use(style)
FIG_COUNT = 1
FIG_PREFIX = 'udck19_pipeline_6_Fig'
# original fitted model summaries
LMER_ACZ_RANEF_F = modl_path / f"{pfx}lmer_acz_ranef.h5"
# for pipeline_6
LMER_LURKING_F = modl_path / f"{pfx}lmer_lurking.h5"
LMER_UNCORR_RANEF_F = modl_path / f"{pfx}lmer_uncorr_ranef.h5"
with open("../measures/udck19_norms_item_counts.yml") as stream:
norms = yaml.load(stream.read(), Loader=yaml.SafeLoader)
# select the article norm responses
article_norms = [resp for resp in norms if "_NA_NA_NA" in resp["item_id"]]
Compute and merge proportion (cloze) of non-article continuations
Such as bare plurals, definite articles, adjectives
def a_an_counter(item):
expt_id = item['expt_id']
assert expt_id in ["norm_1", "norm_3", "norm_6"], f"{expt_id} is not an article norming experiment"
item_id = item['item_id']
article_item_id = re.sub(r"_NA_NA_NA", "", item_id)
orth = item["context_measures"]["orth"]
modal_initial = orth["modal_initial"]
n_resp = orth["n_responses"]
n_NA = orth["n_NAs"]
initial_resp = item['orth'][0]
resp_tokens = initial_resp.keys()
n_a = initial_resp["a"] if "a" in resp_tokens else 0
n_an = initial_resp["an"] if "an" in resp_tokens else 0
return expt_id, item_id, article_item_id, n_resp, n_a, n_an,n_NA
art_counts = pd.DataFrame(
[a_an_counter(response) for response in article_norms],
columns = ["expt", "item_id", "article_norm_id", "n_resp", "n_a", "n_an", "n_NA"]
)
art_counts["n_other"] = art_counts["n_resp"] - (art_counts["n_a"] + art_counts["n_an"])
art_counts["other_cloze"] = art_counts["n_other"] / art_counts["n_resp"]
display(art_counts.shape)
with pd.option_context("max_rows", None):
display(art_counts.head())
display(art_counts.tail())
from matplotlib import pyplot as plt
fig, ax = plt.subplots(figsize=(8, 6))
n, bins, patches = ax.hist(
art_counts["other_cloze"],
bins=30,
weights = np.ones(len(art_counts)) / len(art_counts)
)
ax.set_title("Non-indefinite articles cloze probability")
ax.set(
xlabel="Cloze Probability",
ylabel="Density"
)
# update RHS_VARS for followup analysis
ALT_RHS_VARS = RHS_VARS + ["other_cloze_z", "article_cloze", "article_cloze_c"]
# load epochs prepared for analysis
prepochs_trmd_eeg_df = pd.read_hdf(
PREPOCHS_TRMD_EEG_F, EEG_SCREEN_COL, mode='r'
).reset_index().set_index(["Epoch_idx", "Time"])
# sanity check single trial epochs as screened in pipeline_1
assert (N_EPOCH_SAMPS, N_TRMD_EEG_EPOCHS) == check_epochs_shape(prepochs_trmd_eeg_df)
assert all([val == 'accept' for val in prepochs_trmd_eeg_df[EEG_SCREEN_COL]])
assert len(prepochs_trmd_eeg_df["article_item_id"].unique()) == 794
# index epochs for article norming and merge non-article initial word = "other" cloze values by item
prepochs_trmd_eeg_df["article_norm_id"] = prepochs_trmd_eeg_df["article_item_id"].str.replace(r"_a[_n]_.+", "")
prepochs_trmd_eeg_df = prepochs_trmd_eeg_df.join(
art_counts.set_index("article_norm_id")[["other_cloze"]],
how="left", on="article_norm_id"
)
# pull together presented article, non-indefinite article cloze by stim item
art_other_cloze = (
prepochs_trmd_eeg_df
.query("Time==0")[["article_item_id", "article_norm_id", "article_cloze"]]
.reset_index(drop=True)
.drop_duplicates()
.join(
art_counts.set_index("article_norm_id")["other_cloze"],
how="left",
on="article_norm_id"
)
)
art_other_cloze.shape
# standardize
prepochs_trmd_eeg_df, prepochs_trmd_eeg_means_sds = standardize(
prepochs_trmd_eeg_df,
["article_cloze", "ART_noun_cloze", "NA_noun_cloze", "other_cloze"]
)
# for conversion back
ART_CLOZE_MN = prepochs_trmd_eeg_means_sds["article_cloze"]["mean"]
ART_CLOZE_SD = prepochs_trmd_eeg_means_sds["article_cloze"]["sd"]
# add centered article cloze, not scaled
prepochs_trmd_eeg_df["article_cloze_c"] = (
prepochs_trmd_eeg_df["article_cloze"] - prepochs_trmd_eeg_df["article_cloze"].mean()
)
# sanity checks on data prep
assert all(
col in prepochs_trmd_eeg_df.columns or col in prepochs_trmd_eeg_df.index.names
for col in EEG_26_STREAMS + ALT_RHS_VARS
)
assert set(prepochs_trmd_eeg_df.article_norm_id).issubset(art_counts.article_norm_id)
assert not any(pd.isna(prepochs_trmd_eeg_df.article_cloze))
assert not any(pd.isna(prepochs_trmd_eeg_df.other_cloze))
# slice time-locking events from EEG samples
events_df = prepochs_trmd_eeg_df.query("Time==0").copy()
prepochs_trmd_eeg_df.shape, events_df.shape
# correlation plotter
def show_cloze_corr(col_1, col_2, data):
x = data[col_1]
y = data[col_2]
r, p = scipy.stats.pearsonr(x, y)
corr_res = f"Pearson $r =$ {r:.3f}\n$p =$ {p:.4e}\n$r^2 =$ {r**2:0.3f}"
fig, ax = plt.subplots(figsize=(8, 6))
title_str = f"{col_1} x {col_2} correlation"
ax.set_title(title_str, fontsize=14)
ax.set(
xlabel=f"{col_1} (standardized)",
ylabel=f"{col_2} (standardized)",
)
ax.scatter(
x=x,
y=y,
alpha=.025
);
ax.plot(x, x * r)
ax.annotate(
xy=(1.05, 0.75),
s=corr_res,
xycoords="axes fraction",
fontsize=14,
);
return fig, ax, title_str
vcov_z = np.cov(events_df["article_cloze_z"], events_df["other_cloze_z"])
a_s2 = vcov_z[0][0]
o_s2 = vcov_z[1][1]
ao_s12 = vcov_z[0][1]
assert ao_s12 == vcov_z[1][0]
# covariance/varianc ratio ... article other covar / article var
C = ao_s12 / a_s2
display(vcov_z)
display(C)
f, ax, title_str = show_cloze_corr("article_cloze_z", "other_cloze_z", events_df);
fig_tag = f"{FIG_PREFIX} {FIG_COUNT} {title_str}"
FIG_COUNT = udck19_figsave(f, fig_tag, FIG_COUNT)
lmer_fitter = functools.partial(
fgutil.summary.summarize,
modeler='lmer',
LHS=LMER_CHANNELS,
parallel=True,
n_cores=N_CORES,
REML=False
)
epochs_fg = fitgrid.epochs_from_dataframe(
prepochs_trmd_eeg_df
.loc[time_slice, ALT_RHS_VARS + LMER_CHANNELS], # prerun slicing, if any
epoch_id='Epoch_idx',
time='Time',
channels=LMER_CHANNELS
)
print(epochs_fg.table.columns)
# alternative models
LMER_ALT = {
"lmer_lurking": [
# 0. KIM w/ confound
(
" article_cloze_z + other_cloze_z + "
"(1 | expt) + "
"(article_cloze_z | sub_id) + "
"(1 | article_item_id)"
),
# 1. drop other
(
" article_cloze_z + "
"(1 | expt) + "
"(article_cloze_z | sub_id) + "
"(1 | article_item_id)"
),
# 2. drop article
(
"other_cloze_z + "
"(1 | expt) + "
"(article_cloze_z | sub_id) + "
"(1 | article_item_id)"
),
# 3. drop article, other
(
"(1 | expt) + "
"(article_cloze_z | sub_id) + "
"(1 | article_item_id)"
),
# 4. 5. other as-if KIM
"other_cloze_z + (1 | expt) + (other_cloze_z | sub_id) + (1| article_item_id)",
"(1 | expt) + (other_cloze_z | sub_id) + (1| article_item_id)",
# 6. 7. other as-if KIP
"other_cloze_z + (1 | expt) + (1 | sub_id) + (1| article_item_id)",
"(1 | expt) + (1 | sub_id) + (1| article_item_id)",
],
"lmer_uncorr_ranef": [
# maximal ranefs w/ uncorrelated intercepts and slopes
# with article cloze, centered, scaled
# 0. cloze MAX_uncor
(
" article_cloze + "
"(article_cloze || expt) + "
"(article_cloze || sub_id) + "
"(article_cloze || item_id)"
),
# 1. centered cloze MAX_uncor
(
" article_cloze_c + "
"(article_cloze_c || expt) + "
"(article_cloze_c || sub_id) + "
"(article_cloze_c || item_id)"
),
# 2. standardized cloze MAX_uncor
(
" article_cloze_z + "
"(article_cloze_z || expt) + "
"(article_cloze_z || sub_id) + "
"(article_cloze_z || item_id)"
),
# KIM ranefs with article cloze, centered and scaled
# 3. cloze KIM
(
" article_cloze + "
"(1 | expt) + "
"(article_cloze || sub_id) + "
"(1 | item_id)"
),
# 4. centered cloze KIM
(
" article_cloze_c + "
"(1 | expt) + "
"(article_cloze_c || sub_id) + "
"(1 | item_id)"
),
# 5. standaradized cloze KIM
(
" article_cloze_z + "
"(1 | expt) + "
"(article_cloze_z || sub_id) + "
"(1 | item_id)"
),
# 6. cloze KIM
(
" article_cloze + "
"(1 | expt) + "
"(article_cloze | sub_id) + "
"(1 | item_id)"
),
# 7. centered cloze KIM
(
" article_cloze_c + "
"(1 | expt) + "
"(article_cloze_c | sub_id) + "
"(1 | item_id)"
),
# 8. standardized cloze KIM (easier to recompute than merge summaries)
(
" article_cloze_z + "
"(1 | expt) + "
"(article_cloze_z | sub_id) + "
"(1 | item_id)"
),
]
}
start_time = datetime.datetime.now()
LOGGER.info(f"Start modeling: {start_time.strftime('%d.%b %Y %H:%M:%S')}")
for model_set in LMER_ALT.keys():
LOGGER.info(f"""{model_set}""")
print(model_set)
# supress pandas FutureWarning for rpy2 DataFrame.from_items
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fit_lmer_formulas(
epochs_fg,
lmer_fitter,
LMER_ALT[model_set],
modl_path / (pfx + model_set + ".h5"),
LOGGER
)
elapsed = datetime.datetime.now() - start_time
LOGGER.info(f"Elapsed time modeling: {elapsed}")
# set plot pchan params per beta
beta_kws = {
"(Intercept)": {
'margins': {'bottom': 0.15},
'axes': {"ylim": (-4, 4)}, # these scale y-extent of the waveforms
'cal': {
'ylabel': "$\mu V$",
'yticks': (-2, 2),
},
'chan_label': 'north',
},
"article_cloze_z": {
'margins': {'bottom': 0.15},
'axes': {"ylim": (-.25, 0.75)},
'cal': {
'ylabel': "$\mu V / SD_{article\_cloze}$",
'yticks': (0, .5),
},
'chan_label': 'north'
},
"other_cloze_z": {
'margins': {'bottom': 0.15},
'axes': {"ylim": (-.25, 0.75)},
'cal': {
'ylabel': "$\mu V / SD_{article\_cloze}$",
'yticks': (0, .5),
},
'chan_label': 'north'
},
}
if PRERUN:
layout = MPL_MIDLINE
for beta, kws in beta_kws.items():
kws.update({'chan_label': 'east'})
else:
layout = MPL_32_CHAN
# highlight ... alpha=0 to disable
n4_highlight = {
'xmin': 300,
'xmax': 500,
'color': 'magenta',
'alpha': 0.2
}
assert LMER_LURKING_F.exists()
assert LMER_ACZ_RANEF_F.exists()
assert LMER_UNCORR_RANEF_F.exists()
LOGGER.info(f"""
Plotting fitted models from : {LMER_LURKING_F, LMER_ACZ_RANEF_F, LMER_UNCORR_RANEF_F}
""")
# select model sets for AIC comparison
ALT_LMER_COMPS = {
# lurking variable comparisons
"article_other_stack": (LMER_LURKING_F, LMER_ALT["lmer_lurking"][0:4]), # stack
"other_KIM_comp": (LMER_LURKING_F, LMER_ALT["lmer_lurking"][4:6]), # KIM pair
"other_KIP_comp": (LMER_LURKING_F, LMER_ALT["lmer_lurking"][6:8]), # KIP pair
# article cloze on 3 scales w/ corr_KIM = w/ KIM ranefs
"uncorr_MAX": (LMER_UNCORR_RANEF_F, LMER_ALT["lmer_uncorr_ranef"][0:3]),
"uncorr_KIM": (LMER_UNCORR_RANEF_F, LMER_ALT["lmer_uncorr_ranef"][3:6]),
"corr_KIM": (LMER_UNCORR_RANEF_F, LMER_ALT["lmer_uncorr_ranef"][6:9]),
}
LOGGER.info(
f"Alternative model AIC comparisons\n{ALT_LMER_COMPS}"
)
for x0, x1 in [(-1500, 1500), (-200, 600)]:
for comptag, (rerp_file, comp) in ALT_LMER_COMPS.items():
print(comptag, comp)
try:
f, axs = fgutil.summary.plot_AICmin_deltas(
read_fg_summaries_hdf(rerp_file, comp).query("Time >= @x0 and Time <= @x1")
)
except:
print("bad group: ", comptag, comp, "\n")
raise
f.set_size_inches(16, int(5 * len(axs)))
fig_tag = f"{FIG_PREFIX} {FIG_COUNT} {comptag} AIC {x0} {x1}"
n_rows, n_cols = axs.shape
for row in range(n_rows):
ax = axs[row, 0]
# instead of fig.suptitle
if row == 0:
ax.text(
s=fig_tag,
x=0,
y=1.15,
fontsize='x-large',
fontweight='bold',
ha="left",
va="bottom",
transform=ax.transAxes
)
# panel label
ax.text(
x=-0.1,
y=1.0,
s=f"{panel_from_idx(row)})",
horizontalalignment='right',
fontsize='x-large',
fontweight='bold',
transform=ax.transAxes
)
for axj in [0,1]:
axs[row, axj].axvspan(**n4_highlight)
ax.set(ylim=(0,25))
if ax.get_legend():
ax.get_legend().remove()
if row == n_rows - 1:
ax.legend(loc='upper left', bbox_to_anchor=(0, -0.1), ncol=4)
FIG_COUNT = udck19_figsave(f, fig_tag, FIG_COUNT)
# make sure C is correct
assert np.isclose(C, -0.26445942781151827)
LMER_LURKING_MODELS = {
"other_KIM": (LMER_LURKING_F, LMER_ALT["lmer_lurking"][4]),
"other_KIM_bias": (LMER_LURKING_F, LMER_ALT["lmer_lurking"][4]),
"other_KIP": (LMER_LURKING_F, LMER_ALT["lmer_lurking"][6]),
"other_KIP_bias": (LMER_LURKING_F, LMER_ALT["lmer_lurking"][6]),
"article_KIM": (LMER_ACZ_RANEF_F, LMER_MODELS["lmer_acz_ranef"][5]),
"article_KIM_spurious": (LMER_ACZ_RANEF_F, LMER_MODELS["lmer_acz_ranef"][5]),
"article_KIP": (LMER_ACZ_RANEF_F, LMER_MODELS["lmer_acz_ranef"][7]),
"article_KIP_spurious": (LMER_ACZ_RANEF_F, LMER_MODELS["lmer_acz_ranef"][7]),
"article_other_KIM": (LMER_LURKING_F, LMER_ALT["lmer_lurking"][0]),
}
LOGGER.info(f"lmerERPs for {LMER_LURKING_MODELS}")
intervals = [(-1500, 1500), (-200, 600)]
for x0, x1 in intervals:
for mtag, (rerp_file, model) in LMER_LURKING_MODELS.items():
print(mtag, model)
rerps = read_fg_summaries_hdf(
rerp_file, [model],
).query('Time >= @x0 and Time <= @x1')
# plot bias for non-article cloze KIM and KIP
if "bias" in mtag or "spurious" in mtag:
est_rerps = None
si = pd.IndexSlice
if "bias" in mtag:
# estimate bias of non-article cloze
rerps = rerps.query("beta == 'other_cloze_z'")
rerp_betas = rerps.loc[si[:, :, "other_cloze_z", "Estimate"], :]
est_rerps = rerp_betas * C
elif "spurious" in mtag:
# if article cloze is spurious B2 what caused it
rerps = rerps.query("beta == 'article_cloze_z'")
rerp_betas = rerps.loc[si[:, :, "article_cloze_z", "Estimate"], :]
est_rerps = rerp_betas / C
else:
raise ValueError(f"unknown mtag: {mtag}")
# set the values
for key in ["Estimate", "2.5_ci", "97.5_ci"]:
rerps.loc[pd.IndexSlice[:, :, :, key], :] = est_rerps
plots = plotchans(rerps, beta_kws, style=style, layout=layout, se_ci="CI")
for plot in plots:
beta = plot['beta']
f = plot['fig']
for ax in f.get_axes():
ax.axvspan(**n4_highlight)
# atrocious hack
if "other" in beta:
ax.set(ylabel=ax.get_ylabel().replace("article", "other"))
fig_tag = f"{FIG_PREFIX}_{FIG_COUNT}_{mtag}_{beta}_{x0}_{x1}"
suptitle_txt = f._suptitle.get_text()
x, y = f._suptitle.get_position()
f.suptitle(
f"{FIG_PREFIX} {FIG_COUNT}\n{mtag} {suptitle_txt}",
x=x,
y=y,
fontsize='x-large',
fontweight='bold',
ha="left",
va="bottom",
)
FIG_COUNT = udck19_figsave(
f,
f"{FIG_PREFIX}_{FIG_COUNT}_{mtag}_{beta}_{x0}_{x1}",
FIG_COUNT
)
Appropo the "subtle drawback" of dropping correlated intercepts and slopes
"Models in which the slopes and intercepts are allowed to have a nonzero correlation (e.g., fm1) are invariant to additive shifts of the continuous predictor (Days in this case). This invariance breaks down when the correlation is constrained to zero; any shift in the predictor will necessarily lead to a change in the estimated correlation, and in the likelihood and predictions of the model." (Bates, D., Mächler, M., Bolker, B., & Walker, S. (2015). Fitting Linear Mixed-Effects Models Using lme4. 2015, 67(1), 48. doi:10.18637/jss.v067.i01, p. 9).
Compare article cloze (range 0 - 1), centered article cloze (mean = 0), standardized article cloze for
Maximal model except for dropping experiment, subject, item correlated random intercepts and slope
KIM except dropping correlated random intercepts and slopes
KIM as originally
def key_plots(mtag, df, fixeffs, keys, chans, models, fig_count):
# rescale article_cloze_z back to uV
if "article_cloze_z" in df.index.unique("beta"):
article_cloze_est_uv = (
df.query("key=='Estimate' and beta=='article_cloze_z'") / ART_CLOZE_SD
)
article_cloze_est_uv.reset_index('key', inplace=True)
article_cloze_est_uv["key"] = "Estimate_uv"
article_cloze_est_uv.set_index("key", append=True, inplace=True)
df = pd.concat([df, article_cloze_est_uv])
fig_tag = f"{FIG_PREFIX}_{fig_count}_{mtag}"
f, axs = plt.subplots(len(keys), len(chans), figsize=(16, 3*len(keys)), sharey='row')
for ki, key in enumerate(keys):
data = df.query("key == @key and beta in @fixeffs")
warns = df.query("key == 'has_warning' and beta in @fixeffs")
# hack for rescaled article_cloze_z
if key == "Estimate":
est_uv = df.query("key=='Estimate_uv' and beta=='article_cloze_z'")
else:
est_uv = None
for ci, chan in enumerate(chans):
#print(axs.shape)
ax = axs[ki, ci]
#ax.plot(data.Time.unique(), ptschan, ax=ax)
for mi, model in enumerate(data.index.unique("model")):
model_data = data.reset_index("Time").query("model == @model")
warnings_data = warns.reset_index("Time").query("model==@model")
times = model_data["Time"]
pts = model_data[chan]
# data transforms
if key in ["P-val", "DF"]:
pts = np.log10(pts)
ylabel = f"log10({key})"
else:
ylabel = key
ax.plot(times, pts, label=model) # .split()[0])
# warnings
warn_pts = [
(times[i], pts[i]) for i, pt, in enumerate(warnings_data[chan]) if pt == 1.0
]
for t, wpt in warn_pts:
ax.plot(t, wpt, lw=0, marker='o', color='red')
# cloze back on uV scale
if est_uv is not None:
ax.plot(times, est_uv[chan], color='red', lw=0, marker="+")
#model_data.plot(x="Time", y=chan, ax=ax, label=model.split()[0])
# left column gets key label
if ci == 0:
ax.set(ylabel=f"{ylabel}")
# bottom row gets channel
if ki == len(keys)-1:
ax.set(xlabel=chan)
# first row, col triggers title and legend
if ki == 0 and ci == 0:
ax.margins(y=.5)
leg = ax.legend(loc=(0, 1.025)) #"upper left")
leg.set_in_layout(False)
f.suptitle(
t=fig_tag,
x=0.1, y=.95,
fontsize='x-large',
fontweight='bold',
ha="left",
va="bottom",
)
fig_count = udck19_figsave(f, fig_tag, fig_count)
return fig_count
fixeffs = ["article_cloze_z", "article_cloze", "article_cloze_c"]
keys = ["Estimate", "SE", "T-stat", "P-val", "DF"]
chans = ["MiPf", "MiCe", "MiPa", "MiOc"]
for mtag in ["uncorr_MAX", "uncorr_KIM", "corr_KIM"]:
rerp_file, models = ALT_LMER_COMPS[mtag]
print(rerp_file, models)
rerps = read_fg_summaries_hdf(
rerp_file, models
)#.query('Time >= @x0 and Time <= @x1')
FIG_COUNT = key_plots(mtag, rerps, fixeffs, keys, chans, models, FIG_COUNT)
# log execution time
pipeline_stop = datetime.datetime.now()
elapsed = pipeline_stop - pipeline_start
LOGGER.info(f"""
Done {pipeline_stop.strftime("%d.%b %Y %H:%M:%S")}
Elapsed time: {elapsed}
""")